Extra barriers in async shared-memory comms code.
#include <scsi/scsi.h>
#include <asm/ctrl_if.h>
-
-
typedef unsigned char byte; /* from linux/ide.h */
#define BLKIF_STATE_CLOSED 0
static inline void flush_requests(void)
{
DISABLE_SCATTERGATHER();
+ wmb(); /* Ensure that the frontend can see the requests. */
blk_ring->req_prod = req_prod;
notify_via_evtchn(blkif_evtchn);
}
static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
- BLKIF_RING_IDX i;
+ BLKIF_RING_IDX i, rp;
unsigned long flags;
struct buffer_head *bh, *next_bh;
if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
{
- printk("Bailed out\n");
-
spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
- for ( i = resp_cons; i != blk_ring->resp_prod; i++ )
+ rp = blk_ring->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for ( i = resp_cons; i != rp; i++ )
{
blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
switch ( bret->operation )
control_if_t *ctrl_if = get_ctrl_if();
ctrl_msg_t *msg;
int was_full = TX_FULL(ctrl_if);
+ CONTROL_RING_IDX rp;
- while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+ rp = ctrl_if->tx_resp_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
+ while ( ctrl_if_tx_resp_cons != rp )
{
msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
static void __ctrl_if_rxmsg_deferred(void *unused)
{
ctrl_msg_t *msg;
+ CONTROL_RING_IDX dp;
+
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rmb(); /* Ensure we see all deferred requests up to 'dp'. */
- while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
+ while ( ctrl_if_rxmsg_deferred_cons != dp )
{
msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
ctrl_if_rxmsg_deferred_cons++)];
{
control_if_t *ctrl_if = get_ctrl_if();
ctrl_msg_t msg, *pmsg;
+ CONTROL_RING_IDX rp, dp;
- while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rp = ctrl_if->rx_req_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
+ while ( ctrl_if_rx_req_cons != rp )
{
pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
if ( test_bit(msg.type,
(unsigned long *)&ctrl_if_rxmsg_blocking_context) )
- {
- pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
- ctrl_if_rxmsg_deferred_prod++)];
- memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+ memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
+ &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+ else
+ (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
+ }
+
+ if ( dp != ctrl_if_rxmsg_deferred_prod )
+ {
+ wmb();
+ ctrl_if_rxmsg_deferred_prod = dp;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
- schedule_task(&ctrl_if_rxmsg_deferred_tq);
+ schedule_task(&ctrl_if_rxmsg_deferred_tq);
#else
- schedule_work(&ctrl_if_rxmsg_deferred_work);
+ schedule_work(&ctrl_if_rxmsg_deferred_work);
#endif
- }
- else
- {
- (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
- }
}
}
{
blkif_ring_t *blk_ring = blkif->blk_ring_base;
blkif_request_t *req;
- BLKIF_RING_IDX i;
+ BLKIF_RING_IDX i, rp;
int more_to_do = 0;
+ rp = blk_ring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
/* Take items off the comms ring, taking care not to overflow. */
for ( i = blkif->blk_req_cons;
- (i != blk_ring->req_prod) && ((i-blkif->blk_resp_prod) !=
- BLKIF_RING_SIZE);
+ (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
i++ )
{
if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
resp->id = id;
resp->operation = op;
resp->status = st;
- wmb();
+ wmb(); /* Ensure other side can see the response fields. */
blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
static inline void flush_requests(void)
{
+ wmb(); /* Ensure that the frontend can see the requests. */
blk_ring->req_prod = req_prod;
notify_via_evtchn(blkif_evtchn);
}
{
struct request *req;
blkif_response_t *bret;
- BLKIF_RING_IDX i;
+ BLKIF_RING_IDX i, rp;
unsigned long flags;
spin_lock_irqsave(&blkif_io_lock, flags);
- if (unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery)) {
- printk("Bailed out\n");
-
+ if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) ||
+ unlikely(recovery) )
+ {
spin_unlock_irqrestore(&blkif_io_lock, flags);
return IRQ_HANDLED;
}
- for (i = resp_cons; i != blk_ring->resp_prod; i++) {
+ rp = blk_ring->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for ( i = resp_cons; i != rp; i++ )
+ {
bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
- switch (bret->operation) {
+ switch ( bret->operation )
+ {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
- if (unlikely(bret->status != BLKIF_RSP_OKAY))
+ if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
DPRINTK("Bad return from blkdev data request: %lx\n",
bret->status);
req = (struct request *)bret->id;
- /* XXXcl pass up status */
- if (unlikely(end_that_request_first(req, 1,
- req->hard_nr_sectors)))
+ if ( unlikely(end_that_request_first
+ (req,
+ (bret->status != BLKIF_RSP_OKAY),
+ req->hard_nr_sectors)) )
BUG();
-
end_that_request_last(req);
- blkif_completion( bret, req );
+ blkif_completion(bret, req);
break;
case BLKIF_OP_PROBE:
memcpy(&blkif_control_rsp, bret, sizeof(*bret));
resp_cons = i;
resp_cons_rec = i;
- if (xlbd_blk_queue &&
- test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags)) {
+ if ( (xlbd_blk_queue != NULL) &&
+ test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
+ {
blk_start_queue(xlbd_blk_queue);
/* XXXcl call to request_fn should not be needed but
* we get stuck without... needs investigating
netif_put(netif);
continue;
}
+ rmb(); /* Ensure that we see the request. */
memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
sizeof(txreq));
netif->tx_req_cons++;
*/
static int netctrl_err(int err)
{
- if(err < 0 && !netctrl.err){
+ if ( (err < 0) && !netctrl.err )
netctrl.err = err;
- printk(KERN_WARNING "%s> err=%d\n", __FUNCTION__, err);
- }
return netctrl.err;
}
return 0;
}
-
static void network_tx_buf_gc(struct net_device *dev)
{
NETIF_RING_IDX i, prod;
do {
prod = np->tx->resp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
for ( i = np->tx_resp_cons; i != prod; i++ )
{
if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
panic("Unable to reduce memory reservation\n");
+ /* Above is a suitable barrier to ensure backend will see requests. */
np->rx->req_prod = i;
}
tx->addr = virt_to_machine(skb->data);
tx->size = skb->len;
- wmb();
+ wmb(); /* Ensure that backend will see the request. */
np->tx->req_prod = i + 1;
network_tx_buf_gc(dev);
struct net_private *np = dev->priv;
struct sk_buff *skb;
netif_rx_response_t *rx;
- NETIF_RING_IDX i;
+ NETIF_RING_IDX i, rp;
mmu_update_t *mmu = rx_mmu;
multicall_entry_t *mcl = rx_mcl;
int work_done, budget, more_to_do = 1;
if ( (budget = *pbudget) > dev->quota )
budget = dev->quota;
+ rp = np->rx->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
for ( i = np->rx_resp_cons, work_done = 0;
- (i != np->rx->resp_prod) && (work_done < budget);
+ (i != rp) && (work_done < budget);
i++, work_done++ )
{
rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
void netif_resume(void)
{
- ctrl_msg_t cmsg;
- netif_fe_interface_connect_t up;
-// netif_fe_driver_status_changed_t st;
+ ctrl_msg_t cmsg;
+ netif_fe_interface_connect_t up;
struct net_device *dev = NULL;
struct net_private *np = NULL;
int i;
/* Size of a machine page frame. */
#define PAGE_SIZE 4096
+#if defined(__i386__)
+#define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
+#define wmb() __asm__ __volatile__ ( "" : : : "memory" )
+#else
+#error "Define barriers"
+#endif
+
/*
* *********************** NOTIFIER ***********************
return NULL;
}
+ /* Need to ensure we see the request, despite seeing the index update.*/
+ rmb();
+
cmsg = &cif->tx_ring[MASK_CONTROL_IDX(c)];
xum = PyObject_New(xu_message_object, &xu_message_type);
memcpy(&xum->msg, cmsg, sizeof(*cmsg));
cmsg = &cif->rx_ring[MASK_CONTROL_IDX(p)];
memcpy(cmsg, &xum->msg, sizeof(*cmsg));
+ wmb();
xup->rx_req_prod = cif->rx_req_prod = p + 1;
Py_INCREF(Py_None);
return NULL;
}
+ /* Need to ensure we see the response, despite seeing the index update.*/
+ rmb();
+
cmsg = &cif->rx_ring[MASK_CONTROL_IDX(c)];
xum = PyObject_New(xu_message_object, &xu_message_type);
memcpy(&xum->msg, cmsg, sizeof(*cmsg));
cmsg = &cif->tx_ring[MASK_CONTROL_IDX(p)];
memcpy(cmsg, &xum->msg, sizeof(*cmsg));
+ wmb();
xup->tx_resp_prod = cif->tx_resp_prod = p + 1;
Py_INCREF(Py_None);